Prediction using the bottom up method

This notebook details the process of prediction from which homework a notebook came after featurizing the notebook using the bottom up method. This is done by gathering all templates in each notebook after running the algorithm, then using countvectorizer to featurize the notebooks, and finally using random forests to make the prediction


In [3]:
import sys
home_directory = '/dfs/scratch2/fcipollone'
sys.path.append(home_directory)
import numpy as np
from nbminer.notebook_miner import NotebookMiner

hw_filenames = np.load('../homework_names_jplag_combined_per_student.npy')
hw_notebooks = [[NotebookMiner(filename) for filename in temp[:59]] for temp in hw_filenames]

In [8]:
from nbminer.pipeline.pipeline import Pipeline
from nbminer.features.features import Features
from nbminer.preprocess.get_ast_features import GetASTFeatures
from nbminer.preprocess.get_imports import GetImports
from nbminer.preprocess.resample_by_node import ResampleByNode
from nbminer.encoders.ast_graph.ast_graph import ASTGraphReducer
from nbminer.preprocess.feature_encoding import FeatureEncoding
from nbminer.encoders.cluster.kmeans_encoder import KmeansEncoder
from nbminer.results.similarity.jaccard_similarity import NotebookJaccardSimilarity
from nbminer.results.prediction.corpus_identifier import CorpusIdentifier
a = Features(hw_notebooks[2], 'hw2')
a.add_notebooks(hw_notebooks[3], 'hw3')
a.add_notebooks(hw_notebooks[4], 'hw4')
a.add_notebooks(hw_notebooks[5], 'hw5')
gastf = GetASTFeatures()
rbn = ResampleByNode()
gi = GetImports()
agr = ASTGraphReducer(a, threshold=8, split_call=False)
ci = CorpusIdentifier()
pipe = Pipeline([gastf, rbn, gi, agr, ci])
a = pipe.transform(a)


<nbminer.preprocess.get_ast_features.GetASTFeatures object at 0x7f6988905be0>
236
<nbminer.preprocess.resample_by_node.ResampleByNode object at 0x7f6a369531d0>
236
<nbminer.preprocess.get_imports.GetImports object at 0x7f6988905198>
236
<nbminer.encoders.ast_graph.ast_graph.ASTGraphReducer object at 0x7f698885c8d0>
236
<nbminer.results.prediction.corpus_identifier.CorpusIdentifier object at 0x7f6988874828>
236

In [10]:
import tqdm
X, y = ci.get_data_set()
similarities = np.zeros((len(X), len(X)))
for i in tqdm.tqdm(range(len(X))):
    for j in range(len(X)):
        if len(set.union(set(X[i]), set(X[j]))) == 0:
            continue
        similarities[i][j] = len(set.intersection(set(X[i]), set(X[j]))) / (len(set.union(set(X[i]), set(X[j]))))


100%|██████████| 236/236 [00:02<00:00, 94.91it/s]

Inter and Intra Similarities

The first measure that we can use to determine if something reasonable is happening is to look at, for each homework, the average similarity of two notebooks both pulled from that homework, and the average similarity of a notebook pulled from that homework and any notebook in the corpus not pulled from that homework. These are printed below


In [11]:
def get_avg_inter_intra_sims(X, y, val):
    inter_sims = []
    intra_sims = []
    for i in range(len(X)):
        for j in range(i+1, len(X)):
            if y[i] == y[j] and y[i] == val:
                intra_sims.append(similarities[i][j])
            else:
                inter_sims.append(similarities[i][j])
    return np.array(intra_sims), np.array(inter_sims)

for i in np.unique(y):
    intra_sims, inter_sims = get_avg_inter_intra_sims(X, y, i)
    print('Mean intra similarity for hw',i,'is',np.mean(intra_sims),'with std',np.std(intra_sims))
    print('Mean inter similarity for hw',i,'is',np.mean(inter_sims),'with std',np.std(inter_sims))
    print('----')


Mean intra similarity for hw 0 is 0.325629689953161 with std 0.10830939961936226
Mean inter similarity for hw 0 is 0.3334252638462146 with std 0.08736727777788884
----
Mean intra similarity for hw 1 is 0.3603492301629332 with std 0.08769050925757028
Mean inter similarity for hw 1 is 0.33114211947867855 with std 0.0885997181712564
----
Mean intra similarity for hw 2 is 0.381979672897434 with std 0.08281964553335316
Mean inter similarity for hw 2 is 0.3297197094122375 with std 0.088253305539923
----
Mean intra similarity for hw 3 is 0.38808668441538596 with std 0.08481056917374301
Mean inter similarity for hw 3 is 0.32931811455051274 with std 0.08787554208923494
----

In [15]:
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = 5, 15
def get_all_sims(X, y, val):
    sims = []
    for i in range(len(X)):
        for j in range(i+1, len(X)):
            if y[i] == val or y[j] == val:
                sims.append(similarities[i][j])
    return sims
fig, axes = plt.subplots(4)
for i in range(4):
    axes[i].hist(get_all_sims(X,y,i), bins=30)
    axes[i].set_xlabel("Similarity Value")
    axes[i].set_ylabel("Number of pairs")


Actual Prediction

While the above results are helpful, it is better to use a classifier that uses more information. The setup is as follows:

  1. Split the data into train and test
  2. Vectorize based on templates that exist
  3. Build a random forest classifier that uses this feature representation, and measure the performance

In [16]:
import sklearn
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score

X, y = ci.get_data_set()
countvec = sklearn.feature_extraction.text.CountVectorizer()
X_list = [" ".join(el) for el in X]
countvec.fit(X_list)
X = countvec.transform(X_list)

p = np.random.permutation(len(X.todense()))
X = X.todense()[p]
y = np.array(y)[p]

clf = sklearn.ensemble.RandomForestClassifier(n_estimators=400, max_depth=3)
scores = cross_val_score(clf, X, y, cv=10)
print(scores)
print(np.mean(scores))


[0.75       0.83333333 0.83333333 0.79166667 0.95833333 0.83333333
 0.83333333 0.75       0.75       0.9       ]
0.8233333333333335

In [17]:
from sklearn.ensemble import AdaBoostClassifier
clf = sklearn.ensemble.AdaBoostClassifier(n_estimators=700)
scores = cross_val_score(clf, X, y, cv=10)
print(scores)
print(np.mean(scores))


[0.83333333 0.75       0.75       0.625      0.79166667 0.625
 0.70833333 0.79166667 0.66666667 0.9       ]
0.7441666666666668

In [18]:
X.shape


Out[18]:
(236, 138)

Clustering

Lastly, we try unsupervised learning, clustering based on the features we've extracted, and measure using sillouette score.


In [71]:
X, y = ci.get_data_set()
countvec = sklearn.feature_extraction.text.CountVectorizer()
X_list = [" ".join(el) for el in X]
countvec.fit(X_list)
X = countvec.transform(X_list)

In [72]:
clusterer = sklearn.cluster.KMeans(n_clusters = 4).fit(X)
cluster_score = (sklearn.metrics.silhouette_score(X, clusterer.labels_))
cheat_score = (sklearn.metrics.silhouette_score(X, y))

print('Silhouette score using the actual labels:', cheat_score)
print('Silhouette score using the cluster labels:', cluster_score)


Silhouette score using the actual labels: -0.046440606405044764
Silhouette score using the cluster labels: 0.3239301349071774

In [73]:
x_reduced = sklearn.decomposition.PCA(n_components=2).fit_transform(X.todense())
plt.rcParams['figure.figsize'] = 5, 10
fig, axes = plt.subplots(2)
axes[0].scatter(x_reduced[:,0], x_reduced[:,1], c=y)
axes[0].set_title('PCA Reduced notebooks with original labels')
axes[1].scatter(x_reduced[:,0], x_reduced[:,1], c=clusterer.labels_)
axes[1].set_title('PCA Reduced notebooks with kmean cluster labels')


Out[73]:
Text(0.5,1,'PCA Reduced notebooks with kmean cluster labels')

Trying to restrict features

The problem above is that there are too many unimportant features -- all this noise makes it hard to seperate the different classes. To try to counteract this, I'll try ranking the features using tfidf and only take some of them


In [221]:
X, y = ci.get_data_set()
countvec = sklearn.feature_extraction.text.TfidfVectorizer()
X_list = [" ".join(el) for el in X]
countvec.fit(X_list)
X = countvec.transform(X_list)
#X = X.todense()

In [222]:
feature_array = np.array(tfidf.get_feature_names())
tfidf_sorting = np.argsort(X.toarray()).flatten()[::-1]
top_n = feature_array[tfidf_sorting][:4]
print(top_n)


['template_50' 'template_394' 'template_1902' 'template_294']

In [223]:
X, y = ci.get_data_set()
countvec = sklearn.feature_extraction.text.CountVectorizer()

X_list = [" ".join([val for val in el if val in top_n]) for el in X]
countvec.fit(X_list)
X = countvec.transform(X_list)
X = X.todense()

In [237]:
np.array([X[n,0] for n in range(len(X))]).shape


Out[237]:
(236,)

In [240]:
x_reduced = sklearn.decomposition.PCA(n_components=2).fit_transform(X)
print(x_reduced.shape)
plt.rcParams['figure.figsize'] = 5, 5
plt.scatter(x_reduced[:,0], x_reduced[:,1], c=y)


(236, 2)
Out[240]:
<matplotlib.collections.PathCollection at 0x7f697b9ebeb8>

What's happening

Figuring out what is going on is a bit difficult, but we can look at the top templates generated from the random forest, and see why they might have been chosen


In [19]:
'''
Looking at the output below, it's clear that the bottom up method is recognizing very specific
structures of ast graph, which makes sense because some structures are exactly repeated in
homeworks. For example:

treatment = pd.Series([0]*4 + [1]*2)

is a line in all of the homework one notebooks.

Example function call lines:
var = call(a_bunch_of_operations)
var = a[10] + call(10)
var = sklearn.linear_model.linear_regression(X[a:b], y[a:b])
'''

clf.fit(X,y)
fnames= countvec.get_feature_names()
clfi = clf.feature_importances_
sa = []
for i in range(len(clfi)):
    sa.append((clfi[i], fnames[i]))
sra = [el for el in reversed(sorted(sa))]
import astor
for temp in sra:
    temp = temp[1]
    print(temp, agr.templates.get_examples(temp)[1])
    for i in range(5):
        print ('\t',astor.to_source(agr.templates.get_examples(temp)[0][i]))


template_72 1131
	 from module import import as import

	 from module import import as import

	 from module import import as import

	 from module import import as import

	 from module import import as import

template_1056 353
	 var, var = stats.mannwhitneyu(*var)

	 var, var = stats.ks_2samp(*var)

	 var, var = stats.mannwhitneyu(var, var)

	 var, var = stats.ks_2samp(var, var)

	 var, var = var.split('string')

template_294 530
	 var

	 var

	 var

	 var

	 var

template_50 9859
	 var().run_line_magic('string', 'string')

	 sns.set_context('string')

	 print(len(var))

	 print('string'.format(var, var.attrs['string']))

	 print(var.attrs['string'] + 'string')

template_385 468
	 var = 'string'

	 var = 'string'

	 var = 'string'

	 var = 'string'

	 var = 'string'

template_48 1571
	 import import as import

	 import import as import

	 import import as import

	 import import as import

	 import import as import

template_997 479
	 var['string'] = int(var)

	 var['string'] = int(var)

	 var['string'] = var.apply(var, axis=1, args=(var,))

	 var['string'] = var['string'].fillna(value=0)

	 var['string'] = var['string'].fillna(value=0)

template_887 192
	 var.shape

	 var.shape

	 var.shape

	 var.shape

	 var.shape

template_394 8418
	 var = rq.get('string')

	 var = bs4.BeautifulSoup(var.text, 'string')

	 var = var.find_all('string')

	 var = var.find_all('string')

	 var = rq.get(var, params=var)

template_390 235
	 var = var

	 var = var

	 var = var

	 var = var

	 var = var

template_1050 155
	 var['string'] = var

	 var['string'] = var

	 var['string'] = var

	 var['string'] = var

	 var['string'] = var

template_1845 22
	 var['string'] = var['string'] / var['string']

	 var['string'] = var['string'] / var['string']

	 var['string'] = var['string'] / var['string']

	 var['string'] = var['string'] / var['string']

	 var['string'] = var['string'] / var['string']

template_1791 198
	 var = [var.text.strip() for var in var]

	 var = [var.text.strip() for var in var]

	 var = [int(var.xs(var, level='string').index[0][0].split('string')[0]) for
    var in var]

	 var = [int(var.xs(var, level='string').index[0][1].split('string')[-1]) for
    var in var]

	 var = [int(var.xs(var, level='string').index[-1][0].split('string')[0]) for
    var in var]

template_1069 127
	 var.columns = ['string', 'string', 'string', 'string', 'string', 'string']

	 var.columns = ['string', 'string', 'string', 'string', 'string', 'string']

	 var.columns = ['string', 'string']

	 var.columns = ['string']

	 var.columns = ['string']

template_1815 112
	 var[var] = var(var, var)

	 var[var] = var(var, var, var, var)

	 var[var] = var(var, var, var, var)

	 var[var] = var(var, var)

	 var[var] = stats.ttest_1samp(a=var, popmean=var)

template_1037 183
	 var = var['string']

	 var = var['string']

	 var = var['string']

	 var = var['string']

	 var = var['string']

template_998 99
	 var.Year = var[0].astype(int)

	 var.Year = np.where(var['string'] == 2, var[0].astype(int), var[1].astype(int))

	 var.columns = var.columns.droplevel(0)

	 var.columns = var.columns.droplevel(0)

	 var.columns = var.columns.droplevel(0)

template_383 618
	 var = []

	 var = []

	 var = []

	 var = []

	 var = []

template_1075 133
	 var = var[var.specialization.notnull()]

	 var = var[var['string'].isin(var['string'].tolist())]

	 var = var[var['string'].isin(var['string'].tolist())]

	 var = var[var.ww_x_PERIODE_PEDAGO.isin(['string', 'string'])]

	 var = var[var.ww_x_PERIODE_PEDAGO.isin(['string'])]

template_675 41
	 for var, var in enumerate(var):
    print('string'.format(var, var.attrs['string']))

	 for var, var in var.items():
    print(var, 'string', var[1])

	 for var, var in var.items():
    print(var)

	 for var, var in var.items():
    print(var)

	 for var, var in var.items():
    print(var, 'string', var[1])

template_395 258
	 var = {}

	 var = {}

	 var = {}

	 var = {}

	 var = {}

template_382 292
	 var = ['string', 'string', 'string', 'string']

	 var = ['string', 'string', 'string', 'string']

	 var = ['string', 'string']

	 var = ['string', 'string', 'string', 'string', 'string']

	 var = ['string', 'string', 'string', 'string', 'string']

template_1838 15
	 var = pd.read_excel('string')[['string', 'string']]

	 var = pd.merge(var, var, left_on='string', right_on='string', how='string')[
    ['string', 'string']]

	 var = pd.merge(var, var, on='string', how='string')[['string', 'string',
    'string']]

	 var = var.loc[(var['string'] != 'string') & (var['string'] == 'string')
    ].groupby('string').sum()[['string']]

	 var = var.groupby(['string']).sum()[['string']]

template_379 64
	 var = {'string': 'string', 'string': 'string'}

	 var = {'string': 'string', 'string': 'string'}

	 var = {'string': 'string', 'string': 'string', 'string': 'string', 'string':
    'string', 'string': 'string', 'string': 'string', 'string': 'string',
    'string': 'string', 'string': 'string', 'string': 'string'}

	 var = {'string': 'string', 'string': 'string', 'string': 'string', 'string':
    'string', 'string': 'string', 'string': 'string', 'string': 'string',
    'string': 'string'}

	 var = {'string': 'string', 'string': 'string', 'string': 'string', 'string':
    'string', 'string': 'string', 'string': 'string', 'string': 'string',
    'string': 'string', 'string': 'string'}

template_1021 27
	 var['string'] = var.index

	 var['string'] = var.index

	 var['string'] = var.index

	 var['string'] = var.index

	 var['string'] = var.values

template_1001 125
	 var['string'] = 'string'

	 var['string'] = 'string'

	 var['string'] = 'string'

	 var['string'] = 'string'

	 var['string'] = 'string'

template_903 14
	 def get_basic_info(acad_unit, acad_year, semester, semester_type):
    var = var(var, var, var, var)
    var = var(var)
    return var

	 def df_join(df, duration_df, attribute):
    var = var.reset_index()
    var = var.set_index(['string']).drop_duplicates()
    var = var.reset_index()
    var = var.set_index(['string'])
    var = pd.merge(var, var, right_index=True, left_index=True)
    var = var.reset_index()
    var = var[['string', var, 'string', 'string']].drop_duplicates()
    return var

	 def parse_table(soup):
    var = var.find('string')
    var = var.find_all('string')
    return var

	 def compute_intersection_first_last_semester(df, first, last):
    var = set(var.index).intersection(set(var.index))
    var = var[var.index.isin(var)].sort_index()
    return var

	 def compute_semester_count(intersection):
    var = var.reset_index()[['string', 'string']].groupby(['string']).count()
    var = var.rename(columns={'string': 'string'})
    var = var.merge(var, left_index=True, right_index=True)
    return var

template_1893 23
	 var = var.attrs['string']

	 var = var.attrs['string']

	 var = var.attrs['string']

	 var = var.loc['string']

	 var = var.loc['string']

template_1887 19
	 var[['string']] = var[['string']].apply(pd.to_numeric)

	 var[['string', 'string']] = var[['string', 'string']].apply(pd.to_numeric)

	 var[['string', 'string']] = var[['string', 'string']].apply(pd.to_numeric)

	 var[['string', 'string', 'string']] = var[['string', 'string', 'string']
    ].astype(int)

	 var[['string', 'string']] = var[['string', 'string']].apply(pd.to_numeric)

template_2540 33
	 var = var[var['string'] != 'string']

	 var = var[var['string'] != 'string']

	 var = var[var['string'] != 'string']

	 var = var[var['string'] != 'string']

	 var = var[var['string'] != 'string']

template_2367 15
	 var['string'] = (var['string'] + var['string']) / 2

	 var['string'] = (var['string'] + var['string']) / 2

	 var['string'] = (var['string'] + var['string']) / 2

	 var['string'] = (var['string'] + var['string']) / 2

	 var['string'] = (var['string'] + var['string']) / 2

template_1852 60
	 var = ['string'.format(var + 1) for var in range(6)]

	 var = ['string'.format(var + 1) for var in range(6)]

	 var = [var.findAll('string') for var in var.findAll('string')]

	 var = [var.findAll('string') for var in var.findAll('string')]

	 var = [var.findAll('string') for var in var.findAll('string')]

template_1049 80
	 var = var[0]

	 var = var[1]

	 var = var[0]

	 var = var[1]

	 var = var[0]

template_1025 184
	 var = var.contents

	 var = var.text

	 var = var.children

	 var = var.content

	 var = var.index

template_888 56
	 var[0:5]

	 var[:15]

	 var[:15]

	 var[:15]

	 var[:15]

template_377 417
	 var = 0.1

	 var = 0

	 var = 0.05

	 var = 7

	 var = 0

template_359 100
	 for var in range(var):
    var[var[var]].append(var[var].text)

	 for var in range(len(var)):
    print(str(var) + 'string' + var[var].string + 'string', end='string')
    print(var[var].string)

	 for var in range(len(var) - 2):
    var[var[var].string].append(var[var].string)

	 for var in range(2007, 2017):
    var.update(var(var[str(var) + 'string' + str(var + 1)]))

	 for var in range(2007, 2017):
    var.update(var(var[str(var) + 'string' + str(var + 1)]))

template_2591 16
	 var = var[var['string'] == 'string']['string']

	 var = var[var['string'] == 'string']['string']

	 var = var[var['string'] == 'string']['string']

	 var = var[var['string'] == 'string']['string']

	 var = var[var['string'] == 'string']['string']

template_2548 47
	 var = var[var['string'] == 'string']

	 var = var[var['string'] == 'string']

	 var = var[var['string'] == 'string']

	 var = var[var['string'] == 'string']

	 var = var[var['string'] == 'string']

template_1905 145
	 var = var[['string', 'string', 'string']]

	 var = var[['string', 'string', 'string']]

	 var = var[['string', 'string', 'string', 'string', 'string', 'string']]

	 var = var[['string', 'string']]

	 var = var[['string', 'string']]

template_1805 41
	 var = var[~pd.isnull(var['string'])]

	 var = var[~var['string'].isin(var)]

	 var = var[~var['string'].isin(var)]

	 var = var[~var.index.duplicated(keep='string')]

	 var = var[~var.index.duplicated(keep='string')]

template_1032 65
	 var = var.groupby('string')['string']

	 var = var.groupby('string')['string']

	 var = var.groupby('string')['string']

	 var = var.get_group(var)['string']

	 var = var.xs('string', level='string')['string']

template_2541 27
	 var = var[var.ww_x_PERIODE_PEDAGO == 'string']

	 var = var[var.ww_x_PERIODE_PEDAGO == 'string']

	 var = var[var.gender == 'string']

	 var = var[var.gender == 'string']

	 var = var[var.gender == 'string']

template_2525 9
	 var[var['string'] == 'string']

	 var[var['string'] == 'string']

	 var[var['string'] == 'string']

	 var[var['string'] == 'string']

	 var[var['string'] == 'string']

template_1897 36
	 var = list(map(lambda l: var.text, var.children))[:-1]

	 var = np.argsort(var)[::-1]

	 var = np.argsort(var)[::-1]

	 var = np.argsort(var)[::-1]

	 var = np.argsort(var)[::-1]

template_1867 16
	 var = var[var['string'].isin(var) == False]

	 var = var[pd.to_numeric(var['string'], errors='string').isnull() == False]

	 var = var[var['string'].str.contains('string') == False]

	 var = var[var['string'].str.contains('string') == False]

	 var = var[var['string'].str.contains('string') == True]

template_1856 14
	 var.loc['string'] = var(var)

	 var.loc['string'] = var(var)

	 var.loc['string'] = var(var)

	 var.loc['string'] = var(var)

	 var.loc['string'] = var(var)

template_1842 9
	 var = var[pd.isnull(var['string']) & pd.isnull(var['string'])]

	 var = var[var['string'].isnull() & var['string'].isnull()]

	 var = var[pd.isnull(var['string']) & pd.isnull(var['string'])]

	 var = var[var.University.isnull() & var.Institution.notnull()]

	 var = var[var['string'].notnull() & var['string'].notnull()]

template_1819 9
	 var = var.copy()[var]

	 var = var.Canton.unique()[var]

	 var = var.groupby('string').agg('string')[var]

	 var = var.set_index(['string'], drop=False)[var]

	 var = np.array(var)[var]

template_1804 31
	 var.index.name = 'string'

	 var.index.name = 'string'

	 var.index.name = 'string'

	 var.columns.name = 'string'

	 var.index.name = 'string'

template_1691 25
	 var.ix[:5, :14]

	 var.ix[:5, 14:]

	 var.ix[100:105, :12]

	 var.ix[100:105, 12:]

	 var.ix[:10, :14]

template_955 36
	 del var['string']

	 del var['string'], var['string'], var['string'], var['string'], var['string'
    ], var['string'], var['string']

	 del var['string']

	 del var['string']

	 del var['string']

template_51 234
	 """string"""

	 """string"""

	 """string"""

	 """string"""

	 """string"""

template_388 45
	 var = [0, 0, 0]

	 var = [0, 0, 0]

	 var = [978181, 978187, 978195, 39486325, 123455150, 123456101, 213637754, 
    213637922, 213638028, 355925344]

	 var = [249108, 942120, 942175]

	 var = [978181, 978187, 978195, 39486325, 123455150, 123456101, 213637754, 
    213637922, 213638028, 355925344]

template_374 9
	 var = {'string': var.mean(), 'string': var.mean()}

	 var = {'string': 'string', 'string': 'string', 'string': 'string', 'string':
    str(None), 'string': 'string'}

	 var = {'string': folium.Map(location=[46.8, 8.1], zoom_start=8), 'string':
    folium.Map(location=[46.8, 8.1], zoom_start=8), 'string': folium.Map(
    location=[46.8, 8.1], zoom_start=8), 'string': folium.Map(location=[
    46.8, 8.1], zoom_start=8), 'string': folium.Map(location=[46.8, 8.1],
    zoom_start=8)}

	 var = {'string': 'string', 'string': var['string'].sum()}

	 var = {'string': 'string', 'string': var['string'].sum()}

template_301 43
	 def bachelor_duration(row):
    return var(var['string'], var['string'])

	 def master_duration_rough(row):
    return var(var['string'], var['string'])

	 def get_local_table(semester, year, student_type):
    return pd.read_excel('string' + var + 'string' + var + 'string' + var +
        'string').fillna('string')

	 def create_df(url):
    return var(pd.read_html(var)[0])

	 def clean(string):
    return var.strip().lower().replace('string', 'string')

template_2551 24
	 var = var[var.Spécialisation != 'string']

	 var = var[var.sex != 'string']

	 var = var[var.semester != 'string']

	 var = var[var.Statut != 'string']

	 var = var[var.sex != 'string']

template_2377 10
	 var = var.loc[:, (['string'])]

	 var = var.loc[:, (['string'])]

	 var = var.loc[:, (['string'])]

	 var = var.loc[:, (['string'])]

	 var = var.loc[:, (['string', 'string'])]

template_2375 15
	 var['string'].loc[260] = 'string'

	 var['string'].loc[3315] = 'string'

	 var['string'].loc[10947] = 'string'

	 var['string'].loc[26632] = 'string'

	 var['string'].loc[59733] = 'string'

template_1888 19
	 var['string'] = var['string'] * var['string']

	 var['string'] = var['string'] * var['string']

	 var['string'] = var['string'] * var['string']

	 var['string'] = var['string'] * var['string']

	 var['string'] = var['string'] * var['string']

template_1082 40
	 with requests.session() as var:
    var = var.get(var)

	 with var.request.urlopen(var) as var:
    var = var.read()

	 with var.request.urlopen(var) as var:
    html = var.read()

	 with open('string', 'string') as var:
    var = pickle.load(var)

	 with open('string', 'string') as var:
    var = pickle.load(var)

template_1000 35
	 var['string'] = 6

	 var['string'] = 4

	 var['string'] = 4

	 var['string'] = 0.5

	 var['string'] = 0

template_914 16
	 def mine_data(year, semester):
    var = var(var, var)
    return var(var, var, var)

	 def get_html(sem, year):
    var = var(var['string'], var['string'], var, var, var, var, var, var)
    return requests.get(var)

	 def master_time_spec(group):
    var = var(var)
    var = var['string'].replace('string', np.nan)
    var = var.dropna()
    return pd.Series([var, var[0]], index=['string', 'string'])

	 def extract_long_name(entry):
    var = var.rsplit('string', 1)
    return var[0].rstrip()

	 def time_to_seconds(t):
    var = (pd.to_datetime(var) - datetime.datetime(1970, 1, 1)).total_seconds()
    return int(var)

template_898 25
	 var['string']

	 var['string']

	 var['string']

	 var['string']

	 var['string']

template_893 17
	 var[var['string'].isnull()]

	 var[var['string'].str.contains('string')]

	 var[var['string'].notnull()]

	 var[var.Canton.isnull()]

	 var[var.Canton.isnull()]

template_673 12
	 for var in var:
    var = pd.concat([var, var(var, var, var, var)])

	 for var in var:
    var = plt.scatter(var, var * np.ones(len(var)), marker='string', color=
        'string', alpha=0.5, s=var[:, (var - var)] * 500)
    var = plt.scatter(var, var * np.ones(len(var)), marker='string', color=
        'string', alpha=0.5, s=var[:, (var - var)] * 500)

	 for var in var:
    var = plt.scatter(var, var * np.ones(len(var)), marker='string', color=
        'string', alpha=0.5, s=var[:, (var - var)] * 500)
    var = plt.scatter(var, var * np.ones(len(var)), marker='string', color=
        'string', alpha=0.5, s=var[:, (var - var)] * 500)

	 for var in var:
    var = var.combine_first(var)

	 for var in var:
    var = var.combine_first(var)

template_671 43
	 for var in var:
    var = var.findAll('string')
    var['string'].append(int(var[10].get_text()))
    var['string'].append(var[0].get_text())
    var['string'].append(var[1].get_text())
    var['string'].append(var[6].get_text())
    var['string'].append(var[4].get_text())
    var['string'].append(var[7].get_text())
    var['string'].append(var.group(2))
    var['string'].append(var.group(1))

	 for var in var:
    var = var.get_text().strip().replace('string', 'string')
    var.append(var)

	 for var in var:
    var = var[var].notnull().sum()
    print('string', var, 'string', var)

	 for var in var:
    var = var(var, 'string')
    var.append(var)

	 for var in var:
    var = var.find_all('string')
    var.append([var.text for var in var])

template_643 30
	 for var in range(2007, 2017):
    var = var(str(var), 'string')
    var = var.combine_first(var)
    var = var(str(var), 'string')
    var = var(str(var), 'string')
    var.update(var)
    var.update(var)

	 for var in range(2007, 2017):
    var = var(str(var), 'string')
    var = var.combine_first(var)
    var = var(str(var), 'string')
    var = var(str(var), 'string')
    var = var(str(var), 'string')
    var = var(str(var), 'string')
    var.update(var)
    var.update(var)
    var.update(var)
    var.update(var)

	 for var in range(0, len(var)):
    var = var[var].find_all('string')
    var.append([var[0].text, var[1].text.replace('string', 'string'), var[
        10].text, var[7].text])

	 for var in range(0, len(var)):
    var = var[var].find_all('string')
    var.append([var[0].text, var[1].text.replace('string', 'string'), var[
        10].text, var[4].text])

	 for var in var.find_all('string'):
    var = var.find_all('string')
    var.append([var.text.rstrip('string').replace('string', 'string') for
        var in var])

template_639 14
	 for var in var.vertices:
    plt.plot(var[var, 0], var[var, 1], 'string')

	 for var in var.simplices:
    plt.plot(var[var, 0], var[var, 1], 'string')

	 for var in var.vertices:
    var.append([var[var, 0], var[var, 1]])

	 for var in var.index:
    var.append(var[var].loc[var] / var['string'].loc[var])

	 for var in var.columns:
    print(var + 'string' + str(len(var[var[var].isnull()])))

template_423 30
	 if var:
    var['string'].append(var)
    var['string'].append(var[1]['string'])
    var['string'].append(var)

	 if var:
    var.to_excel(var + var)

	 if var:
    var.append({'string': var[0].string, 'string': var[1].string, 'string':
        var[4].string, 'string': var[6].string, 'string': var[10].string})
else:
    var.append({'string': var[0].string, 'string': var[1].string, 'string':
        var[10].string})

	 if var:
    var.pop()
    var.append(var)

	 if var:
    var.append({'string': var[0].get_text(), 'string': var[1].get_text(),
        'string': var[4].get_text(), 'string': var[6].get_text(), 'string':
        var[7].get_text(), 'string': var[10].get_text(), 'string': var(var)
        [0], 'string': var.split('string')[1], 'string': var(var)[1],
        'string': var.split('string')[0]})

template_408 17
	 var = float(var['string']) + 0.5

	 var = float(var['string']) + 0.5

	 var = float(var['string']) + 1

	 var = float(var['string']) + 1

	 var = float(var['string']) + 0.5

template_407 33
	 var = -1

	 var = -1

	 var = -1

	 var = -1

	 var = -1

template_404 21
	 var = sum(var) / len(var)

	 var = sum(var) / len(var)

	 var = var.groupby('string').var() / var.groupby('string').count()

	 var = var.Years.sum() / len(var)

	 var = var.Years.sum() / len(var)

template_403 9
	 var = len(var) - 4

	 var = 1.0 - var.score(var, var)

	 var = 1.0 - var.score(var, var)

	 var = 1.0 - var.score(var, var)

	 var = 1.0 - var.score(var, var)

template_401 17
	 var = {'string': 2021043255, 'string': 133685247, 'string': 133685270,
    'string': 249847, 'string': 355925344, 'string': 249108, 'string': 2936286}

	 var = {'string': 2021043255, 'string': 133685247, 'string': 133685270,
    'string': 249847, 'string': 355925344, 'string': 249108, 'string': 2936286}

	 var = {'string': 355925344, 'string': 213638028, 'string': 213637922,
    'string': 213637754, 'string': 123456101, 'string': 123455150, 'string':
    39486325, 'string': 978195, 'string': 978187, 'string': 978181}

	 var = {'string': 355925344, 'string': 213638028, 'string': 213637922,
    'string': 213637754, 'string': 123456101, 'string': 123455150, 'string':
    39486325, 'string': 978195, 'string': 978187, 'string': 978181}

	 var = {'string': 249108, 'string': 249114, 'string': 942155, 'string': 
    942163, 'string': 942120, 'string': 2226768, 'string': 942175, 'string':
    2226785}

template_387 88
	 var = None

	 var = None

	 var = None

	 var = None

	 var = None

template_381 12
	 var = var.groupby('string').size() * 6

	 var = var.groupby('string').size() * 6

	 var = var.groupby('string').size() * 6

	 var = var.groupby('string').size() * 6

	 var = var[var['string'] == var].groupby('string').size() * 6

template_378 50
	 var = var[var.semesterNumber == 2].dropna(subset=['string']).sciper

	 var = var[var.semesterNumber == 2].dropna(subset=['string']).sciper

	 var = stats.ttest_ind(var['string'], var[var['string'] == var]['string'],
    equal_var=False).pvalue

	 var = requests.get('string', params=var).text

	 var = var['string'].value_counts().index

template_360 69
	 for var in var:
    print(var.find_all('string'))

	 for var in var:
    print(var.td.input['string'])

	 for var in var:
    var.append(var(var(var, var)))

	 for var in var:
    var.append(var(var(var, var)))

	 for var in var:
    var.append([var.text for var in var.findAll('string')[:-1]])

template_302 10
	 def latest_date(dates):
    """string"""
    return max(var[[(not pd.isnull(var)) for var in var]])

	 def earliest_date(dates):
    """string"""
    return min(var[[(not pd.isnull(var)) for var in var]])

	 def tableToList(self, soup):
    """string"""
    return var.find('string').find_all('string')

	 def run_query(uni_name):
    """string"""
    return requests.get(url='string', params={'string': var, 'string':
        'string', 'string': 'string', 'string': 'string', 'string': 'string'})

	 def functionAttrsFor(df, colname):
    """string"""
    return var.groupby(var).agg(lambda col: var.nunique(dropna=False) == 1
        ).apply(all)

template_291 16
	 pd.notnull(var['string']).sum() / len(var)

	 var[var['string'].isnull()]['string'].sum() / var['string'].sum()

	 var[var['string'].isnull()]['string'].sum() / var['string'].sum()

	 var[var['string'] == 'string']['string'].sum() / var['string'].sum()

	 len(var) / len(var)

template_289 17
	 stats.mannwhitneyu(var.ydiff, var.ydiff, alternative='string').pvalue

	 var.groupby('string').count().civility

	 stats.mannwhitneyu(var.months, var.months, alternative='string').pvalue

	 stats.ttest_ind(a=var, b=var, equal_var=False).pvalue

	 stats.ttest_ind(a=var, b=var, equal_var=False).pvalue

template_264 19
	 var += var

	 var += var

	 var += var

	 var += var

	 var += var

template_263 39
	 var += str(var)

	 var += str(var)

	 var += var['string' + str(var + 1)].fillna(0)

	 var += var['string'].fillna(0)

	 var += var['string'].fillna(0)

template_2597 9
	 for var in set(var.values()):
    var = var + 1.0
    var = [var for var in var.keys() if var[var] == var]
    nx.draw_networkx_nodes(var, var, var, node_size=150, node_color=str(var /
        var))

	 for var in set(var.values()):
    var = var + 1.0
    var = [var for var in var.keys() if var[var] == var]
    nx.draw_networkx_nodes(var, var, var, node_size=30, node_color=str(var /
        var))

	 for var in set(var.values()):
    var = var + 1.0
    var = [var for var in var.keys() if var[var] == var]
    nx.draw_networkx_nodes(var, var, var, node_size=20, node_color=str(var /
        var))

	 for var in set(var.values()):
    var = var + 1.0
    var = [var for var in var.keys() if var[var] == var]
    nx.draw_networkx_nodes(var, var, var, node_size=20, node_color=str(var /
        var))

	 for var in set(var.values()):
    var = var + 1.0
    var = [var for var in var.keys() if var[var] == var]
    nx.draw_networkx_nodes(var, var, var, node_size=20, node_color=str(var /
        var))

template_2592 11
	 var = var[var['string'] != 'string']['string']

	 var = var[var['string'] != 'string']['string']

	 var = var[var['string'] != 'string']['string']

	 var = var[var['string'] != 'string']['string']

	 var = var[var['string'] != 'string']['string']

template_2590 26
	 var = var[var['string'] == 'string'].index

	 var = var[var['string'] == 'string'].index

	 var = var[var['string'] == 'string'].index

	 var = var[var['string'] == 'string'].index

	 var = var[var['string'] == 'string'].index

template_2586 12
	 var = var[(var['string'] == 'string') & (var['string'] == 'string')]

	 var = var[(var['string'] == 'string') & (var['string'] == 'string')]

	 var = var[(var['string'] == 'string') & (var['string'] == 'string')]

	 var = var[(var['string'] == 'string') & (var['string'] == 'string')]

	 var = var[(var['string'] == 'string') & (var['string'] == 'string')]

template_2571 9
	 for var in var['string']:
    if var in var:
        var.loc[var, 'string'] += 1

	 for var in var['string']:
    if var in var:
        var.loc[var, 'string'] += 1

	 for var in var['string']:
    if var in var:
        var.loc[var, 'string'] += 1

	 for var in var['string']:
    if var in var:
        var.loc[var, 'string'] += 1

	 for var in var['string']:
    if var in var:
        var.loc[var, 'string'] += 1

template_2552 14
	 var = var[var.freq < 1000]

	 var = var[var.freq < 10]

	 var = var[var.freq < 10]

	 var = var[var.freq < 10]

	 var = var[var.freq < 1000]

template_2550 11
	 var = var.loc[var['string'] == 'string']

	 var = var.loc[var['string'] == 'string']

	 var = var.loc[var['string'] == 'string']

	 var = var.loc[var['string'] == 'string']

	 var = var.loc[var['string'] == 'string']

template_2549 11
	 var = var[var['string'] == 1]

	 var = var[var['string'] == 0]

	 var = var[var['string'] == 1]

	 var = var[var['string'] == 0]

	 var = var[var['string'] == 0]

template_2547 9
	 var = var[var.games >= 40]

	 var = var[var.freq >= 10]

	 var = var[var.freq >= 10]

	 var = var[var.freq >= 10]

	 var = var[var.freq >= 10]

template_2542 10
	 var = var[var['string'] > 2]

	 var = var[var['string'] > 2]

	 var = var[var['string'] > 50000.0]

	 var = var[var['string'] > 15]

	 var = var[var['string'] > 15]

template_2538 15
	 var = var[var.Semester == 1]

	 var = var[var.semester == 1]

	 var = var[var.semester == 6]

	 var = var[var.semester == 1]

	 var = var[var.semester == 2]

template_2536 9
	 var = var[var['string'] >= 3]

	 var = var[var['string'] >= 36]

	 var = var[var['string'] >= 2007]

	 var = var[var['string'] >= 2007]

	 var = var[var['string'] >= 6]

template_2383 23
	 var.iloc[var, 7] = 'string'

	 var.iloc[var, 7] = 'string'

	 var.iloc[var, 7] = 'string'

	 var.iloc[var, 7] = 'string'

	 var.iloc[var, 7] = 'string'

template_2381 14
	 var = var.iloc[var]['string']

	 var = var.iloc[var]['string']

	 var = var.iloc[var]['string']

	 var = var.iloc[var]['string']

	 var = var.loc[var]['string']

template_2352 11
	 var = [var.get_group(var) for var in var.groups]

	 var = [var.get_group(var) for var in var.groups]

	 var = [var.get_group(var) for var in var.groups]

	 var = [var.get_group(var) for var in var.groups]

	 var = [format(var) for var in var.Documents]

template_2348 9
	 pd.options.mode.chained_assignment = None

	 pd.options.mode.chained_assignment = None

	 pd.options.mode.chained_assignment = None

	 pd.options.mode.chained_assignment = None

	 pd.options.mode.chained_assignment = None

template_2342 68
	 var.loc[var.Institution.str.contains('string') == True, 'string'] = 'string'

	 var.loc[var.Institution.str.contains('string') == True, 'string'] = 'string'

	 var.loc[var.Institution.str.contains('string', case=0) == True, 'string'
    ] = 'string'

	 var.loc[var.Institution.str.contains('string') == True, 'string'] = 'string'

	 var.loc[var.Institution.str.contains('string') == True, 'string'] = 'string'

template_2340 14
	 var = [var for var in var if var not in var.Canton.unique()]

	 var = [var for var in var if var.lower() not in var]

	 var = [var for var in var if var.lower() not in var]

	 var = [var for var in var if var.lower() not in var]

	 var = [var for var in var if var.lower() not in var]

template_2328 30
	 var = [var for var in var if var not in var]

	 var = [var for var in var if var not in var]

	 var = [var for var in var if var not in var]

	 var = [var for var in var if var not in var]

	 var = [var for var in var if var not in var]

template_2326 20
	 var.loc[var['string'].str.contains('string') & var['string'].isnull(), 'string'
    ] = 'string'

	 var.loc[var['string'].str.contains('string') & var['string'].isnull(), 'string'
    ] = 'string'

	 var.loc[var['string'].str.contains('string') & var['string'].isnull(), 'string'
    ] = 'string'

	 var.loc[var['string'].str.contains('string') & var['string'].isnull(), 'string'
    ] = 'string'

	 var.loc[var['string'].str.contains('string') & var['string'].isnull(), 'string'
    ] = 'string'

template_2178 15
	 for var in var:
    var[var] = np.array(var.get_group(var)['string'])

	 for var in var:
    var[var] = sorted(glob(var % var))

	 for var in var:
    var[var] = var(var)

	 for var in var:
    var[var] = var(var)

	 for var in var:
    var[var] = pd.Series(0, index=var.index)

template_2167 9
	 for var, var in var.items():
    var[var] = var(var, var)

	 for var, var in var.iterrows():
    var[var] = var.polarity_scores(var(var.text))

	 for var, var in var.iterrows():
    var[var] = var(var(var.text))

	 for var, var in var.items():
    var[var] = var.polarity_scores(var[var])

	 for var, var in var.items():
    var[var] = len(var)

template_1902 9
	 var = {var.submit(var, var): var for var in var}

	 var = {var.submit(var, var): var for var in var}

	 var = {var: dict() for var in var}

	 var = {var: dict() for var in var}

	 var = {var: var(var + 'string') for var in var}

template_1873 11
	 var = [var for var in var if len(var) > 2]

	 var = [var for var in var if len(var) > 1]

	 var = [var for var in var if len(var) > 1]

	 var = [var for var in var if len(var) > 2]

	 var = [var for var in var if len(var) > 1]

template_1871 14
	 var = var.body.table

	 var = var.index.values

	 var = var.index.values

	 var = var.columns.values

	 var = var.columns.values

template_1864 43
	 var = var.loc[var]

	 var = var.loc[var]

	 var = var.loc[var]

	 var = var.loc[var]

	 var = var.loc[var]

template_1853 79
	 var = var[var]

	 var = var[var]

	 var = var[var]

	 var = var[var]

	 var = var[var]

template_1837 13
	 var.loc['string'] = 1

	 var.loc['string'] = 1

	 var.loc['string'] = 1

	 var.loc['string'] = 1

	 var.loc['string'] = 1

template_1825 11
	 var = var.findAll('string', {'string': 'string'})[0]['string']

	 var = var.findAll('string', {'string': 'string'})[0]['string']

	 var = var.findAll('string', class_='string')[1]['string']

	 var = var.find_all('string', attrs={'string': 'string'})[0]['string']

	 var = var.find_all('string', attrs={'string': 'string'})[0]['string']

template_1821 9
	 var['string'] = var[var]

	 var['string'] = var[var]

	 var['string'] = var[var]

	 var['string'] = var[var]

	 var['string'] = var[var]

template_1806 41
	 var = var.shape[0]

	 var = var.shape[0]

	 var = var.loc[3]

	 var = var.contents[0]

	 var = var.shape[0]

template_1795 23
	 var.loc[var.Canton.isin(['string', 'string', 'string', 'string', 'string',
    'string', 'string']), 'string'] = 'string'

	 var.loc[var.Canton.isin(['string', 'string', 'string']), 'string'] = 'string'

	 var.loc[var.Canton.isin(['string', 'string', 'string', 'string', 'string',
    'string', 'string', 'string']), 'string'] = 'string'

	 var.loc[var.Canton.isin(['string']), 'string'] = 'string'

	 var.loc[var.Canton.isin(['string', 'string', 'string', 'string', 'string']),
    'string'] = 'string'

template_1792 12
	 var = var[:var]

	 var = var[:var]

	 var = var[:var]

	 var = var[:var]

	 var = var[:var]

template_1696 18
	 var.shape[0]

	 var.shape[0]

	 var.shape[0]

	 var.shape[0]

	 var.shape[0]

template_1692 24
	 plt.style.available

	 var.index.is_unique

	 var.index.is_unique

	 var.index.is_unique

	 var.index.is_unique

template_1086 20
	 with open('string', 'string') as var:
    pickle.dump(var, var)

	 with open('string', 'string') as var:
    pickle.dump(var, var)

	 with open(var, 'string') as var:
    pickle.dump(var, var)

	 with open('string', 'string') as var:
    json.dump(var, var)

	 with open('string', 'string') as var:
    json.dump(var, var)

template_1071 10
	 var[0] = 22

	 var[1] = 9

	 var[0] = 22

	 var[1] = 9

	 var[0] = 24

template_1067 16
	 var.columns = [['string', 'string', 'string', 'string']]

	 var.columns = [['string', 'string', 'string', 'string']]

	 var.columns = [['string', 'string', 'string', 'string']]

	 var.columns = [['string', 'string', 'string', 'string']]

	 var.columns = [['string', 'string', 'string', 'string']]

template_1066 25
	 var = var / max(var)

	 var = var / max(var)

	 var = sum([var['string'] for var in var.values() if var['string'] == 1]) / var

	 var = sum([var['string'] for var in var.values() if var['string'] == 0]) / var

	 var = sum([(var['string'] + var.get(var, {}).get('string', 0)) for var, var in
    var.items() if var['string'] == 1]) / var

template_1054 20
	 var = var + var

	 var = var + var

	 var = var + var

	 var = var + var

	 var = var + var

template_1053 20
	 var = var['string'].unique()[1:]

	 var = var.findAll('string')[1:]

	 var = var.findAll('string', {'string': 'string'})[0].findChildren()[1:]

	 var = var.findAll('string', {'string': 'string'})[0].findChildren()[1:]

	 var = var.findAll('string', {'string': 'string'})[0].findChildren()[1:]

template_1052 25
	 var['string'] = var['string']

	 var['string'] = var['string']

	 var['string'] = var['string']

	 var['string'] = var['string']

	 var['string'] = var['string']

template_1051 9
	 var[var[var][10].get_text().replace('string', 'string')] = 1

	 var[var['string'].isnull()] = 0

	 var[var['string'].isnull()] = 0

	 var[var['string'].isnull()] = 0

	 var[var['string'].isnull()] = 0

template_1045 9
	 var = [var, var, var, var]

	 var = [var, var, var, var, var]

	 var = [var, var]

	 var = [var, var, var, var]

	 var = [var, var, var, var]

template_1030 19
	 var.index = var

	 var.index = var

	 var.columns = var

	 var.columns = var

	 var.columns = var

template_1028 39
	 var = {'string': 'string', 'string': 'string', 'string': 'string', 'string':
    'string', 'string': var, 'string': var, 'string': var, 'string': var,
    'string': var, 'string': var, 'string': var, 'string': var, 'string':
    'string'}

	 var = {'string': 'string', var: sum}

	 var = {'string': var, 'string': var, 'string': var}

	 var = {'string': var, 'string': var}

	 var = {'string': var, 'string': var, 'string': var}

template_1023 10
	 var['string'] = None

	 var['string'] = None

	 var['string'] = None

	 var['string'] = None

	 var['string'] = None

template_1022 20
	 var = int(var[:4]) + var

	 var = var('string') + var

	 var = var('string') + var

	 var = var + pd.Series(len(var) * [0.5])

	 var = var + pd.Series(len(var) * [0.5])

template_1018 9
	 var = [0] * len(var['string'])

	 var = [0] * len(var['string'])

	 var = [0] * len(var)

	 var = [0] * len(var)

	 var = [0] * len(var)

template_1016 71
	 var = var.find_all('string')[0]

	 var = pd.read_html(var.decode())[0]

	 var = var.find_all('string')[0]

	 var = pd.read_html(var.decode())[0]

	 var = var.split('string')[1].split('string')[1]

template_1015 9
	 var = 2 * var

	 var = var * 6

	 var = 6 * var

	 var = 6 * var

	 var = 6 * var

template_1013 30
	 var = 'string' + var

	 var = var + 'string'

	 var = var + 'string'

	 var = var + 'string'

	 var = 'string' + var

template_1010 9
	 var = var - 1

	 var = var - 1

	 var = 1 - var

	 var = 1 - var

	 var = 1 - var

template_1003 28
	 var = var[2:]

	 var = var[3:]

	 var = var[3:]

	 var = var[3:]

	 var = var[3:]


In [ ]:


In [ ]: